bitkeeper revision 1.754 (403e554b5Rm0IFijoz1AZr8EH1Ek7A)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 26 Feb 2004 20:21:31 +0000 (20:21 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 26 Feb 2004 20:21:31 +0000 (20:21 +0000)
Many files:
  Clean up domain building tools to play nicely with a tightened-up pagetable interface.

tools/xc/lib/xc_linux_build.c
tools/xc/lib/xc_linux_restore.c
tools/xc/lib/xc_linux_save.c
tools/xc/lib/xc_netbsd_build.c
tools/xc/lib/xc_private.c
tools/xc/lib/xc_private.h
xen/common/domain.c

index 2ea7d00096748fe28afc4102a5ea364ec594ebef..6c6c1c94496f06362c29592f728288587892ebf4 100644 (file)
@@ -87,7 +87,7 @@ static int copy_to_domain_page(int pm_handle,
                                unsigned long dst_pfn, 
                                void *src_page)
 {
-    void *vaddr = map_pfn(pm_handle, dst_pfn);
+    void *vaddr = map_pfn_writeable(pm_handle, dst_pfn);
     if ( vaddr == NULL )
         return -1;
     memcpy(vaddr, src_page, PAGE_SIZE);
@@ -106,13 +106,13 @@ static int setup_guestos(int xc_handle,
                          const char *cmdline,
                          unsigned long shared_info_frame)
 {
-    l1_pgentry_t *vl1tab;
-    l2_pgentry_t *vl2tab;
+    l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
+    l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
     unsigned long *page_array = NULL;
     mmu_update_t *pgt_update_arr = NULL, *pgt_updates = NULL;
     int alloc_index, num_pt_pages;
-    unsigned long l2tab, l2e, l1e=0;
-    unsigned long l1tab = 0;
+    unsigned long l2tab;
+    unsigned long l1tab;
     unsigned long num_pgt_updates = 0;
     unsigned long count, pt_start, i, j;
     unsigned long initrd_addr = 0, initrd_len = 0;
@@ -126,7 +126,7 @@ static int setup_guestos(int xc_handle,
     if ( (pm_handle = init_pfn_mapper()) < 0 )
         goto error_out;
 
-    pgt_updates = malloc((tot_pages + 1024) * 3 * sizeof(mmu_update_t));
+    pgt_updates = malloc((tot_pages + 1) * sizeof(mmu_update_t));
     page_array = malloc(tot_pages * sizeof(unsigned long));
     pgt_update_arr = pgt_updates;
     if ( (pgt_update_arr == NULL) || (page_array == NULL) )
@@ -210,60 +210,31 @@ static int setup_guestos(int xc_handle,
     alloc_index--;
     builddomain->ctxt.pt_base = l2tab;
 
-    /*
-     * Pin down l2tab addr as page dir page - causes hypervisor to provide
-     * correct protection for the page
-     */ 
-    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
-    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
-    pgt_updates++;
-    num_pgt_updates++;
-
     /* Initialise the page tables. */
-    if ( (vl2tab = map_pfn(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
+    if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
         goto error_out;
     memset(vl2tab, 0, PAGE_SIZE);
-    unmap_pfn(pm_handle, vl2tab);
-    l2e = l2tab + (l2_table_offset(virt_load_addr)*sizeof(l2_pgentry_t));
+    vl2e = &vl2tab[l2_table_offset(virt_load_addr)];
     for ( count = 0; count < tot_pages; count++ )
     {    
-        if ( (l1e & (PAGE_SIZE-1)) == 0 )
+        if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
         {
-            l1tab = page_array[alloc_index] << PAGE_SHIFT;
-            if ( (vl1tab = map_pfn(pm_handle, l1tab >> PAGE_SHIFT)) == NULL )
+            l1tab = page_array[alloc_index--] << PAGE_SHIFT;
+            if ( vl1tab != NULL )
+                unmap_pfn(pm_handle, vl1tab);
+            if ( (vl1tab = map_pfn_writeable(pm_handle, 
+                                             l1tab >> PAGE_SHIFT)) == NULL )
                 goto error_out;
             memset(vl1tab, 0, PAGE_SIZE);
-            unmap_pfn(pm_handle, vl1tab);
-            alloc_index--;
-  
-            l1e = l1tab + (l1_table_offset(virt_load_addr+(count<<PAGE_SHIFT))*
-                           sizeof(l1_pgentry_t));
-
-            /* Make appropriate entry in the page directory. */
-            pgt_updates->ptr = l2e;
-            pgt_updates->val = l1tab | L2_PROT;
-            pgt_updates++;
-            num_pgt_updates++;
-            l2e += sizeof(l2_pgentry_t);
+            vl1e = &vl1tab[l1_table_offset(virt_load_addr + 
+                                           (count<<PAGE_SHIFT))];
+            *vl2e++ = l1tab | L2_PROT;
         }
 
-        if ( count < pt_start )
-        {
-            pgt_updates->ptr = l1e;
-            pgt_updates->val = (page_array[count] << PAGE_SHIFT) | L1_PROT;
-            pgt_updates++;
-            num_pgt_updates++;
-            l1e += sizeof(l1_pgentry_t);
-        }
-        else
-        {
-            pgt_updates->ptr = l1e;
-            pgt_updates->val = 
-                ((page_array[count] << PAGE_SHIFT) | L1_PROT) & ~_PAGE_RW;
-            pgt_updates++;
-            num_pgt_updates++;
-            l1e += sizeof(l1_pgentry_t);
-        }
+        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+        if ( count >= pt_start )
+            *vl1e &= ~_PAGE_RW;
+        vl1e++;
 
         pgt_updates->ptr = 
             (page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
@@ -271,11 +242,22 @@ static int setup_guestos(int xc_handle,
         pgt_updates++;
         num_pgt_updates++;
     }
+    unmap_pfn(pm_handle, vl1tab);
+    unmap_pfn(pm_handle, vl2tab);
+
+    /*
+     * Pin down l2tab addr as page dir page - causes hypervisor to provide
+     * correct protection for the page
+     */ 
+    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
+    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
+    pgt_updates++;
+    num_pgt_updates++;
 
     *virt_startinfo_addr =
         virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
 
-    start_info = map_pfn(pm_handle, page_array[alloc_index-1]);
+    start_info = map_pfn_writeable(pm_handle, page_array[alloc_index-1]);
     memset(start_info, 0, sizeof(*start_info));
     start_info->pt_base     = virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
     start_info->mod_start   = initrd_addr;
@@ -289,7 +271,7 @@ static int setup_guestos(int xc_handle,
     unmap_pfn(pm_handle, start_info);
 
     /* shared_info page starts its life empty. */
-    shared_info = map_pfn(pm_handle, shared_info_frame);
+    shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
     memset(shared_info, 0, PAGE_SIZE);
     unmap_pfn(pm_handle, shared_info);
 
index be65a9e36737ee3729bd75329dfc8a6d115c89ec..74e7641bed19bc6d6062170272b8b50e86e80dc4 100644 (file)
@@ -132,7 +132,7 @@ int xc_linux_restore(int xc_handle,
     unsigned long *pfn_type = NULL;
 
     /* A temporary mapping, and a copy, of one frame of guest memory. */
-    unsigned long *ppage, page[1024];
+    unsigned long *ppage;
 
     /* A copy of the pfn-to-mfn table frame list. */
     unsigned long pfn_to_mfn_frame_list[1024];
@@ -241,7 +241,7 @@ int xc_linux_restore(int xc_handle,
         goto out;
 
     /* Copy saved contents of shared-info page. No checking needed. */
-    ppage = map_pfn(pm_handle, shared_info_frame);
+    ppage = map_pfn_writeable(pm_handle, shared_info_frame);
     memcpy(ppage, shared_info, PAGE_SIZE);
     unmap_pfn(pm_handle, ppage);
 
@@ -270,55 +270,42 @@ int xc_linux_restore(int xc_handle,
 
         mfn = pfn_to_mfn_table[i];
 
-        if ( !checked_read(gfd, page, PAGE_SIZE) )
+        ppage = map_pfn_writeable(pm_handle, mfn);
+
+        if ( !checked_read(gfd, ppage, PAGE_SIZE) )
         {
             ERROR("Error when reading from state file");
             goto out;
         }
 
-        ppage = map_pfn(pm_handle, mfn);
-        switch ( pfn_type[i] )
+        if ( pfn_type[i] == L1TAB )
         {
-        case L1TAB:
-            memset(ppage, 0, PAGE_SIZE);
-            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
-                                (mfn<<PAGE_SHIFT) | MMU_EXTENDED_COMMAND,
-                                MMUEXT_PIN_L1_TABLE) )
-                goto out;
             for ( j = 0; j < 1024; j++ )
             {
-                if ( page[j] & _PAGE_PRESENT )
+                if ( ppage[j] & _PAGE_PRESENT )
                 {
-                    if ( (pfn = page[j] >> PAGE_SHIFT) >= nr_pfns )
+                    if ( (pfn = ppage[j] >> PAGE_SHIFT) >= nr_pfns )
                     {
                         ERROR("Frame number in page table is out of range");
                         goto out;
                     }
-                    if ( (pfn_type[pfn] != NONE) && (page[j] & _PAGE_RW) )
+                    if ( (pfn_type[pfn] != NONE) && (ppage[j] & _PAGE_RW) )
                     {
                         ERROR("Write access requested for a restricted frame");
                         goto out;
                     }
-                    page[j] &= (PAGE_SIZE - 1) & ~(_PAGE_GLOBAL | _PAGE_PAT);
-                    page[j] |= pfn_to_mfn_table[pfn] << PAGE_SHIFT;
+                    ppage[j] &= (PAGE_SIZE - 1) & ~(_PAGE_GLOBAL | _PAGE_PAT);
+                    ppage[j] |= pfn_to_mfn_table[pfn] << PAGE_SHIFT;
                 }
-                if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
-                                    (mfn<<PAGE_SHIFT)+(j*sizeof(l1_pgentry_t)),
-                                    page[j]) )
-                    goto out;
             }
-            break;
-        case L2TAB:
-            memset(ppage, 0, PAGE_SIZE);
-            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
-                                (mfn<<PAGE_SHIFT) | MMU_EXTENDED_COMMAND,
-                                MMUEXT_PIN_L2_TABLE) )
-                goto out;
+        }
+        else if ( pfn_type[i] == L2TAB )
+        {
             for ( j = 0; j < (HYPERVISOR_VIRT_START>>L2_PAGETABLE_SHIFT); j++ )
             {
-                if ( page[j] & _PAGE_PRESENT )
+                if ( ppage[j] & _PAGE_PRESENT )
                 {
-                    if ( (pfn = page[j] >> PAGE_SHIFT) >= nr_pfns )
+                    if ( (pfn = ppage[j] >> PAGE_SHIFT) >= nr_pfns )
                     {
                         ERROR("Frame number in page table is out of range");
                         goto out;
@@ -328,27 +315,12 @@ int xc_linux_restore(int xc_handle,
                         ERROR("Page table mistyping");
                         goto out;
                     }
-                    /* Haven't reached the L1 table yet. Ensure it is safe! */
-                    if ( pfn > i )
-                    {
-                        unsigned long **l1 = map_pfn(pm_handle, 
-                                                     pfn_to_mfn_table[pfn]);
-                        memset(l1, 0, PAGE_SIZE);
-                        unmap_pfn(pm_handle, l1);
-                    }
-                    page[j] &= (PAGE_SIZE - 1) & ~(_PAGE_GLOBAL | _PAGE_PSE);
-                    page[j] |= pfn_to_mfn_table[pfn] << PAGE_SHIFT;
+                    ppage[j] &= (PAGE_SIZE - 1) & ~(_PAGE_GLOBAL | _PAGE_PSE);
+                    ppage[j] |= pfn_to_mfn_table[pfn] << PAGE_SHIFT;
                 }
-                if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
-                                    (mfn<<PAGE_SHIFT)+(j*sizeof(l2_pgentry_t)),
-                                    page[j]) )
-                    goto out;
             }
-            break;
-        default:
-            memcpy(ppage, page, PAGE_SIZE);
-            break;
         }
+
         unmap_pfn(pm_handle, ppage);
 
         if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
@@ -356,6 +328,31 @@ int xc_linux_restore(int xc_handle,
             goto out;
     }
 
+    /*
+     * Pin page tables. Do this after writing to them as otherwise Xen
+     * will barf when doing the type-checking.
+     */
+    for ( i = 0; i < nr_pfns; i++ )
+    {
+        if ( pfn_type[i] == L1TAB )
+        {
+            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
+                                (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
+                                MMU_EXTENDED_COMMAND,
+                                MMUEXT_PIN_L1_TABLE) )
+                goto out;
+        }
+        else if ( pfn_type[i] == L2TAB )
+        {
+            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
+                                (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
+                                MMU_EXTENDED_COMMAND,
+                                MMUEXT_PIN_L2_TABLE) )
+                goto out;
+        }
+    }
+
+
     if ( flush_mmu_updates(xc_handle, mmu_updates, &mmu_update_idx) )
         goto out;
 
@@ -369,7 +366,7 @@ int xc_linux_restore(int xc_handle,
         goto out;
     }
     ctxt.i386_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
-    p_srec = map_pfn(pm_handle, mfn);
+    p_srec = map_pfn_writeable(pm_handle, mfn);
     p_srec->resume_info.nr_pages    = nr_pfns;
     p_srec->resume_info.shared_info = shared_info_frame << PAGE_SHIFT;
     p_srec->resume_info.flags       = 0;
@@ -412,7 +409,7 @@ int xc_linux_restore(int xc_handle,
             ERROR("PFN-to-MFN frame number is bad");
             goto out;
         }
-        ppage = map_pfn(pm_handle, pfn_to_mfn_table[pfn]);
+        ppage = map_pfn_writeable(pm_handle, pfn_to_mfn_table[pfn]);
         memcpy(ppage, &pfn_to_mfn_table[i], copy_size);        
         unmap_pfn(pm_handle, ppage);
     }
index 40fc06acd5f71eb2cfc8dfad6c6297fb8d7224b1..1695bd63c08b0ce3b3edb17da2be997931da5f52 100644 (file)
@@ -189,7 +189,7 @@ int xc_linux_save(int xc_handle,
     }
 
     /* If the suspend-record MFN is okay then grab a copy of it to @srec. */
-    p_srec = map_pfn(pm_handle, ctxt.i386_ctxt.esi);
+    p_srec = map_pfn_readonly(pm_handle, ctxt.i386_ctxt.esi);
     memcpy(&srec, p_srec, sizeof(srec));
     unmap_pfn(pm_handle, p_srec);
 
@@ -206,7 +206,8 @@ int xc_linux_save(int xc_handle,
     }
 
     /* Grab a copy of the pfn-to-mfn table frame list. */
-    p_pfn_to_mfn_frame_list = map_pfn(pm_handle, srec.pfn_to_mfn_frame_list);
+    p_pfn_to_mfn_frame_list = map_pfn_readonly(
+        pm_handle, srec.pfn_to_mfn_frame_list);
     memcpy(pfn_to_mfn_frame_list, p_pfn_to_mfn_frame_list, PAGE_SIZE);
     unmap_pfn(pm_handle, p_pfn_to_mfn_frame_list);
 
@@ -243,7 +244,7 @@ int xc_linux_save(int xc_handle,
             }
             if ( pfn_to_mfn_frame != NULL )
                 unmap_pfn(pm_handle, pfn_to_mfn_frame);
-            pfn_to_mfn_frame = map_pfn(pm_handle, mfn);
+            pfn_to_mfn_frame = map_pfn_readonly(pm_handle, mfn);
         }
         
         mfn = pfn_to_mfn_frame[i & 1023];
@@ -306,7 +307,7 @@ int xc_linux_save(int xc_handle,
     }
 
     /* Start writing out the saved-domain record. */
-    ppage = map_pfn(pm_handle, shared_info_frame);
+    ppage = map_pfn_readonly(pm_handle, shared_info_frame);
     if ( !checked_write(gfd, "XenoLinuxSuspend",    16) ||
          !checked_write(gfd, name,                  sizeof(name)) ||
          !checked_write(gfd, &srec.nr_pfns,         sizeof(unsigned long)) ||
@@ -335,7 +336,7 @@ int xc_linux_save(int xc_handle,
 
         mfn = pfn_to_mfn_table[i];
 
-        ppage = map_pfn(pm_handle, mfn);
+        ppage = map_pfn_readonly(pm_handle, mfn);
         memcpy(page, ppage, PAGE_SIZE);
         unmap_pfn(pm_handle, ppage);
 
index 64ba336efc1a13e2ea063f7527745c7ac659077f..23df4afb725eeb6386e86199e4c7547b69cdb2c8 100644 (file)
@@ -84,13 +84,13 @@ static int setup_guestos(int xc_handle,
                          const char *cmdline,
                          unsigned long shared_info_frame)
 {
-    l1_pgentry_t *vl1tab;
-    l2_pgentry_t *vl2tab;
+    l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
+    l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
     unsigned long *page_array = NULL;
     mmu_update_t *pgt_update_arr = NULL, *pgt_updates = NULL;
     int alloc_index, num_pt_pages;
-    unsigned long l2tab, l2e, l1e=0;
-    unsigned long l1tab = 0;
+    unsigned long l2tab;
+    unsigned long l1tab;
     unsigned long num_pgt_updates = 0;
     unsigned long count, pt_start;
     unsigned long symtab_addr = 0, symtab_len = 0;
@@ -104,7 +104,7 @@ static int setup_guestos(int xc_handle,
     if ( (pm_handle = init_pfn_mapper()) < 0 )
         goto error_out;
 
-    pgt_updates = malloc((tot_pages + 1024) * 3 * sizeof(mmu_update_t));
+    pgt_updates = malloc((tot_pages + 1) * sizeof(mmu_update_t));
     page_array = malloc(tot_pages * sizeof(unsigned long));
     pgt_update_arr = pgt_updates;
     if ( (pgt_update_arr == NULL) || (page_array == NULL) )
@@ -145,61 +145,31 @@ static int setup_guestos(int xc_handle,
     alloc_index--;
     builddomain->ctxt.pt_base = l2tab;
 
-    /*
-     * Pin down l2tab addr as page dir page - causes hypervisor to provide
-     * correct protection for the page
-     */ 
-    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
-    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
-    pgt_updates++;
-    num_pgt_updates++;
-
     /* Initialise the page tables. */
-    if ( (vl2tab = map_pfn(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
+    if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
         goto error_out;
     memset(vl2tab, 0, PAGE_SIZE);
-    unmap_pfn(pm_handle, vl2tab);
-    l2e = l2tab + (l2_table_offset(*virt_load_addr)*sizeof(l2_pgentry_t));
+    vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
     for ( count = 0; count < tot_pages; count++ )
     {
-        if ( (l1e & (PAGE_SIZE-1)) == 0 )
+        if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
         {
-            l1tab = page_array[alloc_index] << PAGE_SHIFT;
-            if ( (vl1tab = map_pfn(pm_handle, l1tab >> PAGE_SHIFT)) == NULL )
+            l1tab = page_array[alloc_index--] << PAGE_SHIFT;
+            if ( vl1tab != NULL )
+                unmap_pfn(pm_handle, vl1tab);
+            if ( (vl1tab = map_pfn_writeable(pm_handle,
+                                             l1tab >> PAGE_SHIFT)) == NULL )
                 goto error_out;
             memset(vl1tab, 0, PAGE_SIZE);
-            unmap_pfn(pm_handle, vl1tab);
-            alloc_index--;
-  
-            l1e = l1tab + (l1_table_offset(*virt_load_addr + 
-                                           (count<<PAGE_SHIFT)) *
-                           sizeof(l1_pgentry_t));
-
-            /* Make appropriate entry in the page directory. */
-            pgt_updates->ptr = l2e;
-            pgt_updates->val = l1tab | L2_PROT;
-            pgt_updates++;
-            num_pgt_updates++;
-            l2e += sizeof(l2_pgentry_t);
+            vl1e = &vl1tab[l1_table_offset(*virt_load_addr + 
+                                           (count<<PAGE_SHIFT))];
+            *vl2e++ = l1tab | L2_PROT;
         }
 
-        if ( count < pt_start )
-        {
-            pgt_updates->ptr = l1e;
-            pgt_updates->val = (page_array[count] << PAGE_SHIFT) | L1_PROT;
-            pgt_updates++;
-            num_pgt_updates++;
-            l1e += sizeof(l1_pgentry_t);
-        }
-        else
-        {
-            pgt_updates->ptr = l1e;
-            pgt_updates->val = 
-                ((page_array[count] << PAGE_SHIFT) | L1_PROT) & ~_PAGE_RW;
-            pgt_updates++;
-            num_pgt_updates++;
-            l1e += sizeof(l1_pgentry_t);
-        }
+        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+        if ( count >= pt_start )
+            *vl1e &= ~_PAGE_RW;
+        vl1e++;
 
         pgt_updates->ptr = 
             (page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
@@ -207,11 +177,22 @@ static int setup_guestos(int xc_handle,
         pgt_updates++;
         num_pgt_updates++;
     }
+    unmap_pfn(pm_handle, vl1tab);
+    unmap_pfn(pm_handle, vl2tab);
+
+    /*
+     * Pin down l2tab addr as page dir page - causes hypervisor to provide
+     * correct protection for the page
+     */ 
+    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
+    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
+    pgt_updates++;
+    num_pgt_updates++;
 
     *virt_startinfo_addr =
         *virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
 
-    start_info = map_pfn(pm_handle, page_array[alloc_index-1]);
+    start_info = map_pfn_writeable(pm_handle, page_array[alloc_index-1]);
     memset(start_info, 0, sizeof(*start_info));
     start_info->pt_base     = *virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
     start_info->mod_start   = symtab_addr;
@@ -225,7 +206,7 @@ static int setup_guestos(int xc_handle,
     unmap_pfn(pm_handle, start_info);
 
     /* shared_info page starts its life empty. */
-    shared_info = map_pfn(pm_handle, shared_info_frame);
+    shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
     memset(shared_info, 0, PAGE_SIZE);
     unmap_pfn(pm_handle, shared_info);
 
@@ -542,8 +523,9 @@ loadelfimage(gzFile kernel_gfd, int pm_handle, unsigned long *page_array,
                     goto out;
                 }
                 curpos += c;
-                vaddr = map_pfn(pm_handle, page_array[(iva - *virt_load_addr)
-                                                     >> PAGE_SHIFT]);
+                vaddr = map_pfn_writeable(pm_handle, 
+                                          page_array[(iva - *virt_load_addr)
+                                                    >> PAGE_SHIFT]);
                 if ( vaddr == NULL )
                 {
                     ERROR("Couldn't map guest memory");
@@ -650,8 +632,9 @@ loadelfimage(gzFile kernel_gfd, int pm_handle, unsigned long *page_array,
                 }
                 curpos += c;
 
-                vaddr = map_pfn(pm_handle, page_array[(maxva - *virt_load_addr)
-                                                     >> PAGE_SHIFT]);
+                vaddr = map_pfn_writeable(pm_handle, 
+                                          page_array[(maxva - *virt_load_addr)
+                                                    >> PAGE_SHIFT]);
                 if ( vaddr == NULL )
                 {
                     ERROR("Couldn't map guest memory");
@@ -696,8 +679,9 @@ loadelfimage(gzFile kernel_gfd, int pm_handle, unsigned long *page_array,
         c = PAGE_SIZE - (symva & (PAGE_SIZE - 1));
         if ( c > s - i )
             c = s - i;
-        vaddr = map_pfn(pm_handle, page_array[(symva - *virt_load_addr)
-                                             >> PAGE_SHIFT]);
+        vaddr = map_pfn_writeable(pm_handle, 
+                                  page_array[(symva - *virt_load_addr)
+                                            >> PAGE_SHIFT]);
         if ( vaddr == NULL )
         {
             ERROR("Couldn't map guest memory");
index 7fa201bc903d71066c353463dff792988b630b6d..4331cc943c29ad012bfe15b4f801ebd2d4c9796e 100644 (file)
@@ -16,7 +16,7 @@ int close_pfn_mapper(int pm_handle)
     return close(pm_handle);
 }
 
-void *map_pfn(int pm_handle, unsigned long pfn)
+void *map_pfn_writeable(int pm_handle, unsigned long pfn)
 {
     void *vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
                        MAP_SHARED, pm_handle, pfn << PAGE_SHIFT);
@@ -25,6 +25,15 @@ void *map_pfn(int pm_handle, unsigned long pfn)
     return vaddr;
 }
 
+void *map_pfn_readonly(int pm_handle, unsigned long pfn)
+{
+    void *vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
+                       MAP_SHARED, pm_handle, pfn << PAGE_SHIFT);
+    if ( vaddr == MAP_FAILED )
+        return NULL;
+    return vaddr;
+}
+
 void unmap_pfn(int pm_handle, void *vaddr)
 {
     (void)munmap(vaddr, PAGE_SIZE);
index 2b5d5604cdbce5cdf694a4234dac2df488a427d6..c08a2564ec466383b8b832198812ebb2cc13842c 100644 (file)
@@ -44,8 +44,8 @@
 #define PAGE_SIZE               (1UL << PAGE_SHIFT)
 #define PAGE_MASK               (~(PAGE_SIZE-1))
 
-typedef struct { unsigned long l1_lo; } l1_pgentry_t;
-typedef struct { unsigned long l2_lo; } l2_pgentry_t;
+typedef unsigned long l1_pgentry_t;
+typedef unsigned long l2_pgentry_t;
 
 #define l1_table_offset(_a) \
           (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
@@ -149,7 +149,8 @@ static inline int do_block_io_op(int xc_handle, block_io_op_t *op)
  */
 int init_pfn_mapper(void);
 int close_pfn_mapper(int pm_handle);
-void *map_pfn(int pm_handle, unsigned long pfn);
+void *map_pfn_writeable(int pm_handle, unsigned long pfn);
+void *map_pfn_readonly(int pm_handle, unsigned long pfn);
 void unmap_pfn(int pm_handle, void *vaddr);
 
 #endif /* __XC_PRIVATE_H__ */
index 597429b3575f026f7ede0321330b453e201cc72e..8186248dc827ac80fb3a89f7e965b43a4f87df02 100644 (file)
@@ -160,11 +160,11 @@ void __kill_domain(struct task_struct *p)
     write_lock_irqsave(&tasklist_lock, flags);
     pp = &task_list;                       /* Delete from task_list. */
     while ( *pp != p ) 
-        *pp = (*pp)->next_list;
+        pp = &(*pp)->next_list;
     *pp = p->next_list;
     pp = &task_hash[TASK_HASH(p->domain)]; /* Delete from task_hash. */
     while ( *pp != p ) 
-        *pp = (*pp)->next_hash;
+        pp = &(*pp)->next_hash;
     *pp = p->next_hash;
     write_unlock_irqrestore(&tasklist_lock, flags);